{
int evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
BUG_ON(evtchn < 0);
- notify_via_evtchn(evtchn);
+ notify_remote_via_evtchn(evtchn);
}
void __send_IPI_shortcut(unsigned int shortcut, int vector)
synch_set_bit(evtchn, &s->evtchn_pending[0]);
}
+void notify_remote_via_irq(int irq)
+{
+ int evtchn = irq_to_evtchn[irq];
+
+ if (VALID_EVTCHN(evtchn))
+ notify_remote_via_evtchn(evtchn);
+}
+
void irq_resume(void)
{
evtchn_op_t op;
{
int evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
BUG_ON(evtchn < 0);
- notify_via_evtchn(evtchn);
+ notify_remote_via_evtchn(evtchn);
}
void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
/* Kick the relevant domain. */
- notify_via_evtchn(blkif->evtchn);
+ notify_remote_via_irq(blkif->irq);
}
void blkif_deschedule(blkif_t *blkif)
static inline void flush_requests(struct blkfront_info *info)
{
RING_PUSH_REQUESTS(&info->ring);
- notify_via_evtchn(info->evtchn);
+ notify_remote_via_irq(info->irq);
}
static void kick_pending_request_queues(struct blkfront_info *info)
spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
/* Kick the relevant domain. */
- notify_via_evtchn(blkif->evtchn);
+ notify_remote_via_irq(blkif->irq);
}
static struct miscdevice blktap_miscdev = {
/* don't block - write as much as possible and return */
-static int __xencons_ring_send(struct ring_head *ring, const char *data, unsigned len)
+static int __xencons_ring_send(
+ struct ring_head *ring, const char *data, unsigned len)
{
int copied = 0;
int xencons_ring_send(const char *data, unsigned len)
{
- struct ring_head *out = outring();
- int sent = 0;
-
- sent = __xencons_ring_send(out, data, len);
- notify_via_evtchn(xen_start_info->console_evtchn);
+ int sent = __xencons_ring_send(outring(), data, len);
+ notify_remote_via_irq(xencons_irq);
return sent;
-
}
static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
static gnttab_transfer_t grant_rx_op[MAX_PENDING_REQS];
-static unsigned char rx_notify[NR_EVENT_CHANNELS];
+static unsigned char rx_notify[NR_IRQS];
/* Don't currently gate addition of an interface to the tx scheduling list. */
#define tx_work_exists(_if) (1)
{
netif_t *netif = NULL;
s8 status;
- u16 size, id, evtchn;
+ u16 size, id, irq;
multicall_entry_t *mcl;
mmu_update_t *mmu;
gnttab_transfer_t *gop;
gop->status, netif->domid);
/* XXX SMH: should free 'old_mfn' here */
status = NETIF_RSP_ERROR;
- }
- evtchn = netif->evtchn;
+ }
+ irq = netif->irq;
id = netif->rx->ring[
MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
if (make_rx_response(netif, id, status,
(unsigned long)skb->data & ~PAGE_MASK,
size, skb->proto_csum_valid) &&
- (rx_notify[evtchn] == 0)) {
- rx_notify[evtchn] = 1;
- notify_list[notify_nr++] = evtchn;
+ (rx_notify[irq] == 0)) {
+ rx_notify[irq] = 1;
+ notify_list[notify_nr++] = irq;
}
netif_put(netif);
}
while (notify_nr != 0) {
- evtchn = notify_list[--notify_nr];
- rx_notify[evtchn] = 0;
- notify_via_evtchn(evtchn);
+ irq = notify_list[--notify_nr];
+ rx_notify[irq] = 0;
+ notify_remote_via_irq(irq);
}
/* More work to do? */
mb(); /* Update producer before checking event threshold. */
if (i == netif->tx->event)
- notify_via_evtchn(netif->evtchn);
+ notify_remote_via_irq(netif->irq);
}
static int make_rx_response(netif_t *netif,
/* Only notify Xen if we really have to. */
mb();
if (np->tx->TX_TEST_IDX == i)
- notify_via_evtchn(np->evtchn);
+ notify_remote_via_irq(np->irq);
return 0;
*/
np->backend_state = BEST_CONNECTED;
wmb();
- notify_via_evtchn(np->evtchn);
+ notify_remote_via_irq(np->irq);
network_tx_buf_gc(dev);
if (np->user_state == UST_OPEN)
rc = offset;
DPRINTK("Notifying frontend via event channel %d\n",
tpmif->evtchn);
- notify_via_evtchn(tpmif->evtchn);
+ notify_remote_via_irq(tpmif->irq);
return rc;
}
DPRINTK("Notifying backend via event channel %d\n",
tp->evtchn);
- notify_via_evtchn(tp->evtchn);
+ notify_remote_via_irq(tp->irq);
spin_unlock_irq(&tp->tx_lock);
return offset;
data += avail;
len -= avail;
update_output_chunk(out, avail);
- notify_via_evtchn(xen_start_info->store_evtchn);
+ notify_remote_via_irq(xenbus_irq);
} while (len != 0);
return 0;
pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);
/* If it was full, tell them we've taken some. */
if (was_full)
- notify_via_evtchn(xen_start_info->store_evtchn);
+ notify_remote_via_irq(xenbus_irq);
}
/* If we left something, wake watch thread to deal with it. */
void *dev_id);
extern void unbind_evtchn_from_irqhandler(unsigned int irq, void *dev_id);
+/*
+ * Unlike notify_remote_via_evtchn(), this is safe to use across
+ * save/restore. Notifications on a broken connection are silently dropped.
+ */
+void notify_remote_via_irq(int irq);
+
extern void irq_resume(void);
/* Entry point for notifications into Linux subsystems. */
synch_clear_bit(port, &s->evtchn_pending[0]);
}
-static inline int notify_via_evtchn(int port)
+static inline void notify_remote_via_evtchn(int port)
{
evtchn_op_t op;
op.cmd = EVTCHNOP_send;
op.u.send.local_port = port;
- return HYPERVISOR_event_channel_op(&op);
+ (void)HYPERVISOR_event_channel_op(&op);
}
/*